Spring Framework 문서
- https://spring.io/docs/reference --> Spring Framework
- Spring Framework Reference Documentation, 4.3.25.RELEASE
if not "%JSSE_OPTS%" == "" goto gotJsseOpts set "JSSE_OPTS=-Djdk.tls.ephemeralDHKeySize=2048 -Duser.language=en -Duser.region=US" :gotJsseOpts set "JAVA_OPTS=%JAVA_OPTS% %JSSE_OPTS%"
# encoding=utf8 """ Minimal character-level Vanilla RNN model. Written by Andrej Karpathy (@karpathy) BSD License """ import numpy as np import codecs # data I/O with codecs.open('input.txt', 'r', encoding='utf-8') as fp: data = fp.read() # data = open('input.txt', 'r').read() # should be simple plain text file chars = list(set(data)) data_size, vocab_size = len(data), len(chars) print ('data has %d characters, %d unique.' % (data_size, vocab_size)) char_to_ix = { ch:i for i,ch in enumerate(chars) } ix_to_char = { i:ch for i,ch in enumerate(chars) } # hyperparameters hidden_size = 100 # size of hidden layer of neurons seq_length = 25 # number of steps to unroll the RNN for learning_rate = 1e-1 # model parameters Wxh = np.random.randn(hidden_size, vocab_size)*0.01 # input to hidden Whh = np.random.randn(hidden_size, hidden_size)*0.01 # hidden to hidden Why = np.random.randn(vocab_size, hidden_size)*0.01 # hidden to output bh = np.zeros((hidden_size, 1)) # hidden bias by = np.zeros((vocab_size, 1)) # output bias def lossFun(inputs, targets, hprev): """ inputs,targets are both list of integers. hprev is Hx1 array of initial hidden state returns the loss, gradients on model parameters, and last hidden state """ xs, hs, ys, ps = {}, {}, {}, {} hs[-1] = np.copy(hprev) loss = 0 # forward pass for t in range(len(inputs)): xs[t] = np.zeros((vocab_size,1)) # encode in 1-of-k representation xs[t][inputs[t]] = 1 hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh) # hidden state ys[t] = np.dot(Why, hs[t]) + by # unnormalized log probabilities for next chars ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for next chars loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss) # backward pass: compute gradients going backwards dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why) dbh, dby = np.zeros_like(bh), np.zeros_like(by) dhnext = np.zeros_like(hs[0]) for t in reversed(range(len(inputs))): dy = np.copy(ps[t]) dy[targets[t]] -= 1 # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here dWhy += np.dot(dy, hs[t].T) dby += dy dh = np.dot(Why.T, dy) + dhnext # backprop into h dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity dbh += dhraw dWxh += np.dot(dhraw, xs[t].T) dWhh += np.dot(dhraw, hs[t-1].T) dhnext = np.dot(Whh.T, dhraw) for dparam in [dWxh, dWhh, dWhy, dbh, dby]: np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1] def sample(h, seed_ix, n): """ sample a sequence of integers from the model h is memory state, seed_ix is seed letter for first time step """ x = np.zeros((vocab_size, 1)) x[seed_ix] = 1 ixes = [] for t in range(n): h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh) y = np.dot(Why, h) + by p = np.exp(y) / np.sum(np.exp(y)) ix = np.random.choice(range(vocab_size), p=p.ravel()) x = np.zeros((vocab_size, 1)) x[ix] = 1 ixes.append(ix) return ixes n, p = 0, 0 mWxh, mWhh, mWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why) mbh, mby = np.zeros_like(bh), np.zeros_like(by) # memory variables for Adagrad smooth_loss = -np.log(1.0/vocab_size)*seq_length # loss at iteration 0 while True: # prepare inputs (we're sweeping from left to right in steps seq_length long) if p+seq_length+1 >= len(data) or n == 0: hprev = np.zeros((hidden_size,1)) # reset RNN memory p = 0 # go from start of data inputs = [char_to_ix[ch] for ch in data[p:p+seq_length]] targets = [char_to_ix[ch] for ch in data[p+1:p+seq_length+1]] # sample from the model now and then if n % 100 == 0: sample_ix = sample(hprev, inputs[0], 200) txt = ''.join(ix_to_char[ix] for ix in sample_ix) print ('----\n %s \n----' % (txt, )) # forward seq_length characters through the net and fetch gradient loss, dWxh, dWhh, dWhy, dbh, dby, hprev = lossFun(inputs, targets, hprev) smooth_loss = smooth_loss * 0.999 + loss * 0.001 if n % 100 == 0: print ('iter %d, loss: %f' % (n, smooth_loss)) # print progress # perform parameter update with Adagrad for param, dparam, mem in zip([Wxh, Whh, Why, bh, by], [dWxh, dWhh, dWhy, dbh, dby], [mWxh, mWhh, mWhy, mbh, mby]): mem += dparam * dparam param += -learning_rate * dparam / np.sqrt(mem + 1e-8) # adagrad update p += seq_length # move data pointer n += 1 # iteration counterfdsfds
---- beahngy amo k ns aeo?cdse nh a taei.rairrhelardr nela haeiahe. Ddelnss.eelaishaner” cot AAfhB ht ltny ehbih a”on bhnte ectrsnae abeahngy amo k ns aeo?cdse nh a taei.rairrhelardr nol e iohahenasen ---- iter 9309400, loss: 0.000086 ---- e nh a taei.rairrhelardr naioa aneaa ayio pe e bhnte ayio pe e h’e btentmuhgehi bcgdltt. gey heho grpiahe. Ddelnss.eelaishaner” cot AAfhB ht ltny ehbih a”on bhnte ectrsnae abeahngy amo k ns aeo?cds ---- iter 9309500, loss: 0.000086 ---- jCTCnhoofeoxelif edElobe negnk e iohehasenoldndAmdaI ayio pe e h’e btentmuhgehi bcgdltt. gey heho grpiahe. Ddelnss.eelaishaner” cot AAfhB ht ltny ehbih a”on bhnte ectrsnae abeahngy amo k ns aeo?cds ---- iter 9309600, loss: 0.000086 ---- negnk e iohehasenoldndAmdaI ayio pe e h’e btentmuhgehi bcgdltt. gey heho grpiahe. Ddelnss.eelaishaner” cot AAfhB ht ltny ehbih a”on bhnte ectrsnae abeahngy amo k ns aeo?cdse nh a taei.rairrhelardr ---- iter 9309700, loss: 0.000086 ---- aI ayio pe e h’e btentmuhgehi bcgdltt. gey heho grpiahe. Ddelnss.eelaishaner” cot AAfhB ht ltny ehbih a”on bhnte ectrsnae abeahngy amo k ns aeo?cdse nh a taei.rairrhelardr neli ae e angnI hyho gben ---- iter 9309800, loss: 0.000086 ---- gehi bcgdltt. gey heho grpiahe. Ddelnss.eelaishaner” cot AAfhB ht ltny ehbih a”on bhnte ectrsnae abeahngy amo k ns aeo?cdse nh a taei.rairrhelardr nela dr iohecgrpiahe. Ddelnss.eelaishaner” cot AA ---- iter 9309900, loss: 0.000086 ---- piahe. Ddelnss.eelaishaner” cot AAfhB ht ltny ehbih a”on bhnte ectrsnae abeahngy amo k ns aeo?cdse nh a taei.rairrhelardr nol e iohahenasenese hbea bhnte ectrsnae abeahngy amo k ns aeo?cdse nh a t ---- iter 9310000, loss: 0.000086 ---- er” cot AAfhB ht ltny ehbih a”on bhnte ectrsnae abeahngy amo k ns aeo?cdse nh a taei.rairrhelardr nela hamnaI ayio pe e h’e btentmuhgnhi beahe Ddabealohe bee amoi bcgdltt. gey heho grpiahe. Ddeln ---- iter 9310100, loss: 0.000086 ---- bih a”on bhnte ectrsnae abeahngy amo k ns aeo?cdse nh a taei.rairrhelardr nol gyio pe e h’e btentmuhgehi bcgdltt. gey heho grpiahe. Ddelnss.eelaishaner” cot AAfhB ht ltny ehbih a”on bhnte ectrsnae ---- iter 9310200, loss: 0.000086 ---- beahngy amo k ns aeo?cdse nh a taei.rairrhelardr ntlhnegnns. e amo k ns aeh?cdse nh a taei.rairrhelardr nol e iohehengrpiahe. Ddelnss.eelaishaner” cot AAfhB ht ltny ehbih a”on bhnte ectrsnae abeah ---- iter 9310300, loss: 0.000086 ---- e nh a taei.rairrhelardr nol’e btentmuhgehi gcdslatha arenbggcodaeta tehr he ni.rhelaney gehnha e ar i ho bee amote ectrsnae abeahngy amo k ns aeo?cdse nh a taei.rairrhelardr nol nyio chge heiohecgr ---- iter 9310400, loss: 0.000086 ---- jCTCnhoofeoxelif edElobe negnk e iohehasenoldndAmdaI ayio pe e h’e btentmuhgehi bcgdltt. gey heho grpiahe. Ddelnss.eelaishaner” cot AAfhB ht ltny ehbih a”on bhnte ectrsnae abeahngy amo k ns aeo?cds ---- iter 9310500, loss: 0.000086 ---- negnk e iohehasenoldndAmdaI ayio pe e h’e btentmuhgehi bcgdltt. gey heho grpiahe. Ddelnss.eelaishaner” cot AAfhB ht ltny ehbih a”on bhnte ectrsnae abeahngy amo k ns aeo?cdse nh a taei.rairrhelardr ---- iter 9310600, loss: 0.000086 ---- aI ayio pe e h’e btentmuhgehi bcgdltt. gey heho grpiahe. Ddelnss.eelaishaner” cot AAfhB ht ltny ehbih a”on bhnte ectrsnae abeahngy amo k ns aeo?cdse nh a taei.rairrhelardr nelardae abeahngy amo k ---- iter 9310700, loss: 0.000086 ---- gehi bcgdltt. gey heho grpiahe. Ddelnss.eelaishaner” cot AAfhB ht ltny ehbih a”on bhnte ectrsnae abeahngy amo k ns aeo?cdse nh a taei.rairrhelardr ntl negnk t hi rsnse nhk br ne” a naeiarairr elirs ---- iter 9310800, loss: 0.000086 ---- piahe. Ddelnss.eelaishaner” cot AAfhB ht ltny ehbih a”on bhnte ectrsnae abeahngy amo k ns aeo?cdse nh a taei.rairrhelardr nelardaenabeahngelareierhi. aif edElobe negrcih gey gey heho grpiahe. Ddel ----
<!-- The core Firebase JS SDK is always required and must be listed first -->
<script src="https://www.gstatic.com/firebasejs/6.6.2/firebase-app.js"></script>
<!-- TODO: Add SDKs for Firebase products that you want to use
https://firebase.google.com/docs/web/setup#available-libraries -->
<script>
// Your web app's Firebase configuration
var firebaseConfig = {
apiKey: "AsdfDQ3uOXYZp_MrNQPSauO4sdfsPn4OU8",
authDomain: "coreal.firebaseapp.com",
databaseURL: "https://coreal.firebaseio.com",
projectId: "coreal",
storageBucket: "",
messagingSenderId: "997454353605",
appId: "1:997454353605:web:5354b7543a6d9e68fdcad2" };
// Initialize Firebase
firebase.initializeApp(firebaseConfig);
</script>
@firebase/database: FIREBASE WARNING: Provided authentication credentials for the app named "[DEFAULT]" are invalid. This usually indicates your app was not initialized correctly. Make sure the "credential" property provided to initializeApp() is authorized to access the specified "databaseURL" and is from the correct project.
한글 사용법 / euc-kr / mbcs / convert encoding / decoding / 인코딩 변경 방법 / nodejs / node / 노드에서 인코딩 변경 / euckr / korean / utf8 / 한글파일 읽기 / 한글 읽는 법 / 파일 읽기 / read 하는 법 / write 하는 법 / euc-kr 읽기 /euckr 읽기 / euc kr excel / 엑셀 읽기
iconv 를 설치하려면 build 환경(native code compilation)이 구성되어야 한다.
npm i iconv
이녀석은 순수 js 로 구성되었다. README 에서는 어떤 경우에는 iconv
보다 빠르다고 이야기 한다.
npm i iconv-lite
아래처럼 사용할 수 있다.
const iconv = require('iconv-lite');
const stream = fs.createWriteStream(filename);
const header = ['이름', '주소'];
const buf = iconv.encode(header.join(','), 'euc-kr');
stream.write(buf);
stream.end();
euc-kr 로 된 file을 read 할 때는 아래처럼 하면 된다.
import * as fs from 'fs';
import iconv from 'iconv-lite'
try {
// readFileEncoding
const data = await fs.promises.readFile(inputPath)
const utf8Data = iconv.decode(data, 'euc-kr')
console.log(utf8Data)
} catch (err) {
return console.error('Error reading file:', err);
}
카카오톡: https://cs.kakao.com/helps?articleId=470002560&service=8&category=24&device=1&locale=ko |
// latest version
// <root>\node_modules\@adonisjs\framework\src\Encryption\index.js
const Encryptor = require('simple-encryptor')
...
class Encryption {
constructor (appKey, options) {
/**
* Throw exception when app key doesn't exists.
*/
if (!appKey) {
throw GE.RuntimeException.missingAppKey('Encryption')
}
this.appKey = appKey
this.encryptor = Encryptor(Object.assign({ key: appKey }, options))
}
...
// old version
// <root>\node_modules\@adonisjs\framework\src\Encryption\index.js
const Encryptor = require('simple-encryptor')
...
class Encryption {
constructor (Config) {
const appKey = Config.get('app.appKey')
/**
* Throw exception when app key doesn't exists.
*/
if (!appKey) {
throw GE.RuntimeException.missingAppKey('Encryption')
}
this.encryptor = Encryptor(appKey)
}
...
// simple-encryptor
module.exports = function(opts) {
if( typeof(opts) == 'string' ) {
opts = {
key: opts,
hmac: true,
debug: false
};
}
var key = opts.key;
var verifyHmac = opts.hmac;
var debug = opts.debug;
var reviver = opts.reviver;
...
Changed default digest for the dgst and enc commands from MD5 to sha256
라고 나온다.crypto
module 을 사용하고 있다면 주의가 필요하다.error:06065064:digital envelope routines:EVP_DecryptFinal_ex:bad decrypt
var cryptoKey = crypto.createHash('sha256').update(key).digest();
Which branch should be used for bringing forth production releases? - develop - master Branch name for production releases: [master] Which branch should be used for integration of the "next release"? - develop Branch name for "next release" development: [develop] How to name your supporting branch prefixes? Feature branches? [feature/] Bugfix branches? [bugfix/] Release branches? [release/] Hotfix branches? [hotfix/] Support branches? [support/] Version tag prefix? [] Hooks and filters directory? [C:/a/prog/mainsource/.git/hooks]
git flow feature start
를 이용해서 feature branch 를 만들 수 있다. 이때 어느 branch 에서 만들어도 develop branch에서 만들어준다.
- 실제로 일어나는 일을 설명하자면 일단 두 브랜치가 나뉘기 전인 공통 커밋으로 이동하고 나서
- 그 커밋부터 지금 Checkout한 브랜치가 가리키는 커밋까지 diff를 차례로 만들어
- 어딘가에 임시로 저장해 놓는다.
- Rebase할 브랜치(역주 - experiment)가 합칠 브랜치(역주 - master)가 가리키는 커밋을 가리키게 하고
- 아까 저장해 놓았던 변경사항을 차례대로 적용한다.
그림 3-29는 이러한 과정을 나타내고 있다.
rss reader / rss feed reader / 비번 설정 / 텔레 비번 설정 / 텔레그램에서 전화번호 변경 방법 /
https://t.me/rss2tg_bot : 이 봇(bot)을 이용하면 된다.
일정너비 이상 telegram desktop(pc버전) 의 너비(width) 를 늘리면, 말풍선이 왼쪽으로 모입니다.
이것은 내가 online 인지 여부나, 마지막 접속 시간등을 상대방과 공유할 것인지 여부를 정할 수 있다.
참고로, 상대방과 공유하지 않으면, 나도 상대방의 online 여부 등을 알 수 없다. 그리고 공유하지 않아도, 접속시간이, '최근', '일주일내' 등등 으로 상대방에게 알려진다.
Settings \--\> Privacy & security(개인 정보 및 보안) \--\> Privacy 부분 \--\> Last seen & online
내 전화번호가 상대방에게 노출되는 것을 막을 수 있다.
그리고 상대방이 내 번호를 등록했다고 해도, 내가 상대방의 번호를 contacts 에 등록하지 않으면, 상대방은 내 번호를 확인할 수 없다. 이것은 홍콩시위때 중국 정부가 이것을 악용해서 막았다.(참고: 텔레그램, 곧 업데이트… 중국 당국의 홍콩 시위자 신원 추적 가능성 따라)
비밀번호를 설정 하는 방법이다.
'안전한 메신저' 텔레그램도 뚫렸나? 기사에서도 보듯이 sms 를 통제당하게 되면(sim 카드복사등) 뚫리게 된다. 그래서 "2단계 로그인" 설정을 해놓는 것이 좋다.
일정시간 텔레그램에 접속하지 않으면, 계정을 삭제한다. 이때 이 일정기간을 정할 수 있다. 최대 1년이다.
번호를 변경하면, 그 번호로 모든 정보가 옮겨진다.
기본적으로 그림이나, 파일등은 일정크기 이하라면, 자동으로 다운로드 된다. 이 부분에 대한 설정을 할 수 있다.
이 기능을 켜면 복사, 캡쳐 모두 안된다. 전달(forward)도 안된다.
폴더로 채팅방을 구분할 수 있다.
https://telegram.org/blog/instant-view#telegraph
telegraph
프리미엄에서 얻을 수 있는 혜택
t.me/name
)// saga.js
import { put, takeEvery, all } from 'redux-saga/effects'
const delay = (ms) => new Promise(res => setTimeout(res, ms))
export function* helloSaga() {
console.log('Hello Sagas!')
}
// ...
// Our worker Saga: will perform the async increment task
export function* incrementAsync() {
yield delay(1000)
yield put({ type: 'INCREMENT' })
}
// Our watcher Saga: spawn a new incrementAsync task on each INCREMENT_ASYNC
export function* watchIncrementAsync() {
yield takeEvery('INCREMENT_ASYNC', incrementAsync)
}
// notice how we now only export the rootSaga
// single entry point to start all Sagas at once
export default function* rootSaga() {
yield all([
helloSaga(),
watchIncrementAsync()
])
}
// main.js
import "babel-polyfill"
import React from 'react'
import ReactDOM from 'react-dom'
import { createStore, applyMiddleware } from 'redux'
import Counter from './Counter'
import reducer from './reducers'
//// default export 인 sagaMiddlewareFactory 를 createSagaMiddleware 에 assign 한 것
import createSagaMiddleware from 'redux-saga'
// import { rootSaga } from './sagas'
import rootSaga from './sagas'
// const store = createStore(reducer)
const sagaMiddleware = createSagaMiddleware()
const store = createStore(
reducer,
applyMiddleware(sagaMiddleware)
)
sagaMiddleware.run(rootSaga)
const action = type => store.dispatch({type})
function render() {
ReactDOM.render(
<Counter
value={store.getState()}
onIncrement={() => action('INCREMENT')}
onDecrement={() => action('DECREMENT')}
onIncrementAsync={() => action('INCREMENT_ASYNC')} />,
document.getElementById('root')
)
}
render()
store.subscribe(render)
export default function applyMiddleware(...middlewares) { return createStore => (...args) => { // pass the createStore as an argument const store = createStore(...args) let dispatch = () => { throw new Error( 'Dispatching while constructing your middleware is not allowed. ' + 'Other middleware would not be applied to this dispatch.' ) } const middlewareAPI = { getState: store.getState, dispatch: (...args) => dispatch(...args) } // 기존의 dispatch 에 middleware 를 추가해서 dispatch 를 새롭게 만든다. const chain = middlewares.map(middleware => middleware(middlewareAPI)) dispatch = compose(...chain)(store.dispatch) return { ...store, dispatch } } } ... export default function createStore(reducer, preloadedState, enhancer) { ... if (typeof enhancer !== 'undefined') { if (typeof enhancer !== 'function') { throw new Error('Expected the enhancer to be a function.') } return enhancer(createStore)(reducer, preloadedState) } ... }
proc(env, iterator, context, effectId, getMetaInfo(saga), /* isRoot */ true, noop)
// saga.js
export default function* rootSaga() {
yield all([
helloSaga(),
watchIncrementAsync()
])
}
...
// runSaga.js function runSaga(_ref, saga) { ... var iterator = saga.apply(void 0, args); ... return immediately(function () { var task = proc(env, iterator, context, effectId, __chunk_1.getMetaInfo(saga), /* isRoot */ true, __chunk_1.noop); if (sagaMonitor) { sagaMonitor.effectResolved(effectId, task); } return task; }); }
function proc(env, iterator, parentContext, parentEffectId, meta, isRoot, cont) { if (iterator[__chunk_1.asyncIteratorSymbol]) { throw new Error("redux-saga doesn't support async generators, please use only regular ones"); } ... next(); // then return the task descriptor to the caller return task; /** * This is the generator driver * It's a recursive async/continuation function which calls itself * until the generator terminates or throws * @param {internal commands(TASK_CANCEL | TERMINATE) | any} arg - value, generator will be resumed with. * @param {boolean} isErr - the flag shows if effect finished with an error * * receives either (command | effect result, false) or (any thrown thing, true) */ function next(arg, isErr) { try { var result; if (isErr) { result = iterator.throw(arg); // user handled the error, we can clear bookkept values clear(); } else if (__chunk_1.shouldCancel(arg)) { /** getting TASK_CANCEL automatically cancels the main task We can get this value here - By cancelling the parent task manually - By joining a Cancelled task **/ ... } else if (__chunk_1.shouldTerminate(arg)) { // We get TERMINATE flag, i.e. by taking from a channel that ended using `take` (and not `takem` used to trap End of channels) ... } else { result = iterator.next(arg); } ... } catch (error) { if (mainTask.status === CANCELLED) { throw error; } mainTask.status = ABORTED; mainTask.cont(error, true); } } ... }
yield takeEvery('INCREMENT_ASYNC', incrementAsync)
간단히 이야기 하면, await/async 를 사용하게 해주는 것이라 보면 된다.
아래 글을 읽으면 자세한 이야기를 알 수 있다.
const crypto = require('crypto'); const hmac = crypto.createHmac('sha256', 'a secret'); hmac.update('some data to hash'); console.log(hmac.digest('hex'));
const ccursor = Database.connection('cocktail') .raw( `SELECT * FROM point AS t1 WHERE t1.id !='tester' ORDER BY id ASC` ).stream() const prom = new Promise((resolve, reject)=>{ Logger.error('test2'); ccursor.on('data', (row)=>{ Logger.info('test-data'); }) ccursor.on('end', () => { Logger.info('test-data'); resolve() }) ccursor.on('error', (param1) => { Logger.error('test-error : ' + param1); reject() }) }); await prom.then((val)=>{ Logger.info('test-resolve'); }).catch((val)=>{ Logger.error('test-reject'); });
Logger.info('test-data');
가 계속 실행되다가 끝날때 Logger.info('test-data');
가 실행될 것이다.const prom = new Promise((resolve, reject)=>{
const ccursor =
Database.connection('cocktail')
.raw(
`SELECT * FROM g5_point AS t1 WHERE t1.mb_id !='admin'
ORDER BY po_id ASC`
).stream();
ccursor.on('data', (row)=>{
Logger.error('test-data');
})
ccursor.on('end', () => {
Logger.error('test-end');
resolve()
})
ccursor.on('error', (param1) => {
Logger.error('test-error : ' + param1);
reject()
})
});
await prom.then((val)=>{
Logger.error('test-resolve');
}).catch((val)=>{
Logger.error('test-reject');
});
const prom = new Promise(async (resolve, reject)=>{ const ccursor = Database.connection('cocktail') .raw( `SELECT * FROM g5_point AS t1 WHERE t1.mb_id !='admin' ORDER BY po_id ASC` ).stream(); ccursor.on('data', async (row)=>{ Logger.error('test-data'); }) ccursor.on('end', async () => { Logger.error('test-end'); resolve() }) ccursor.on('error', async (param1) => { Logger.error('test-error : ' + param1); reject() }) }); await prom.then((val)=>{ Logger.error('test-resolve'); }).catch((val)=>{ Logger.error('test-reject'); });